PPC_C_WARNINGS += -Wundef -Wmissing-prototypes -Wmissing-declarations
CFLAGS += $(PPC_C_WARNINGS)
-LINK=0x3000000
+LINK=0x400000
boot32_link_base = $(LINK)
xen_link_offset = 100
xen_link_base = $(patsubst %000,%$(xen_link_offset),$(LINK))
int arch_domain_create(struct domain *d)
{
unsigned long rma_base;
- unsigned long rma_size;
+ unsigned long rma_sz;
uint htab_order;
if (d->domain_id == IDLE_DOMAIN_ID) {
}
d->arch.rma_order = cpu_rma_order();
- rma_size = 1UL << d->arch.rma_order << PAGE_SHIFT;
+ rma_sz = rma_size(d->arch.rma_order);
/* allocate the real mode area */
d->max_pages = 1UL << d->arch.rma_order;
+ d->tot_pages = 0;
d->arch.rma_page = alloc_domheap_pages(d, d->arch.rma_order, 0);
if (NULL == d->arch.rma_page)
return 1;
rma_base = page_to_maddr(d->arch.rma_page);
- BUG_ON(rma_base & (rma_size-1)); /* check alignment */
+ BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
- printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_size);
- memset((void *)rma_base, 0, rma_size);
+ printk("clearing RMO: 0x%lx[0x%lx]\n", rma_base, rma_sz);
+ memset((void *)rma_base, 0, rma_sz);
d->shared_info = (shared_info_t *)
(rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
void arch_domain_destroy(struct domain *d)
{
- free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
htab_free(d);
}
void domain_relinquish_resources(struct domain *d)
{
- /* nothing to do? */
+ free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
}
void arch_dump_domain_info(struct domain *d)
htab_raddr = (ulong)alloc_xenheap_pages(order);
ASSERT(htab_raddr != 0);
/* XXX check alignment guarantees */
- ASSERT((htab_raddr & (htab_bytes-1)) == 0);
+ ASSERT((htab_raddr & (htab_bytes - 1)) == 0);
/* XXX slow. move memset out to service partition? */
memset((void *)htab_raddr, 0, htab_bytes);
+ d->arch.htab.order = order;
d->arch.htab.log_num_ptes = log_htab_bytes - LOG_PTE_SIZE;
d->arch.htab.sdr1 = htab_calc_sdr1(htab_raddr, log_htab_bytes);
d->arch.htab.map = (union pte *)htab_raddr;
d->arch.htab.shadow = xmalloc_array(ulong,
1UL << d->arch.htab.log_num_ptes);
ASSERT(d->arch.htab.shadow != NULL);
-
- printf("%s: dom%x sdr1: %lx\n", __func__, d->domain_id, d->arch.htab.sdr1);
}
void htab_free(struct domain *d)
{
ulong htab_raddr = GET_HTAB(d);
- free_xenheap_pages((void *)htab_raddr,
- (1UL << d->arch.htab.log_num_ptes) << LOG_PTE_SIZE);
+ free_xenheap_pages((void *)htab_raddr, d->arch.htab.order);
xfree(d->arch.htab.shadow);
}
void shadow_drop_references(
struct domain *d, struct page_info *page)
{
- panic("%s\n", __func__);
}
unsigned int cpu_rma_order(void)
{
/* XXX what about non-HV mode? */
- return 14; /* 1<<14<<PAGE_SIZE = 64M */
+ uint rma_log_size = 6 + 20; /* 64M */
+ return rma_log_size - PAGE_SHIFT;
}
void cpu_initialize(void)
printk("System RAM: %luMB (%lukB)\n", eomem >> 20, eomem >> 10);
+ /* top of memory */
max_page = PFN_DOWN(ALIGN_DOWN(eomem, PAGE_SIZE));
total_pages = max_page;
- /* skip the exception handlers */
+ /* Architecturally the first 4 pages are exception hendlers, we
+ * will also be copying down some code there */
heap_start = init_boot_allocator(4 << PAGE_SHIFT);
+ /* we give the first RMA to the hypervisor */
+ xenheap_phys_end = rma_size(cpu_rma_order());
+
+ /* allow everything else to be allocated */
+ init_boot_pages(xenheap_phys_end, eomem);
+ init_frametable();
+ end_boot_allocator();
+
+ /* Add memory between the beginning of the heap and the beginning
+ * of out text */
+ init_xenheap_pages(heap_start, (ulong)_start);
+
/* move the modules to just after _end */
if (modules_start) {
printk("modules at: %016lx - %016lx\n", modules_start,
modules_start + modules_size);
}
+ /* the rest of the xenheap, starting at the end of modules */
+ init_xenheap_pages(freemem, xenheap_phys_end);
+
+
#ifdef OF_DEBUG
printk("ofdump:\n");
/* make sure the OF devtree is good */
ofd_walk((void *)oftree, OFD_ROOT, ofd_dump_props, OFD_DUMP_ALL);
#endif
- percpu_init_areas();
-
- /* mark all memory from modules onward as unused */
- init_boot_pages(freemem, eomem);
-
- init_frametable();
- end_boot_allocator();
-
- /* place the heap from after the allocator bitmap to _start */
- xenheap_phys_end = (ulong)_start;
- init_xenheap_pages(heap_start, xenheap_phys_end);
heap_size = xenheap_phys_end - heap_start;
printk("Xen heap: %luMB (%lukB)\n", heap_size >> 20, heap_size >> 10);
+ percpu_init_areas();
+
cpu_initialize();
#ifdef CONFIG_GDB
#define RMA_CONSOLE 3
#define RMA_LAST_DOMU 3
-#define rma_size(rma_order) (1UL << (rma_order) << PAGE_SHIFT)
+#define rma_size(rma_order) (1UL << ((rma_order) + PAGE_SHIFT))
static inline ulong rma_addr(struct arch_domain *ad, int type)
{
struct domain_htab {
ulong sdr1;
- ulong log_num_ptes; /* log number of PTEs in HTAB. */
+ uint log_num_ptes; /* log number of PTEs in HTAB. */
+ uint order; /* order for freeing. */
union pte *map; /* access the htab like an array */
ulong *shadow; /* idx -> logical translation array */
};
#define memguard_unguard_range(_p,_l) ((void)0)
extern unsigned long xenheap_phys_end;
-#define IS_XEN_HEAP_FRAME(_pfn) (page_to_mfn(_pfn) < xenheap_phys_end)
+#define IS_XEN_HEAP_FRAME(_pfn) (page_to_maddr(_pfn) < xenheap_phys_end)
/*
* Per-page-frame information.